Import packages¶

In [1]:
import sys
print(sys.version)

import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from rasterio.plot import show
from datetime import datetime

import pandas as pd
import rasterio
import seaborn as sns

# Get today's date
today_date = datetime.today().strftime('%Y-%m-%d')
3.12.5 | packaged by Anaconda, Inc. | (main, Sep 12 2024, 18:18:29) [MSC v.1929 64 bit (AMD64)]

Import the tabular data¶

In [2]:
buffalo_id = 2005
n_samples = 10297 # 2005 has 10297 samples

# buffalo_id = 2018
# n_samples = 9440 # 2018 has 9440 samples

# Specify the path to your CSV file
csv_file_path = f'../buffalo_local_data_id/buffalo_{buffalo_id}_data_df_lag_1hr_n{n_samples}.csv'

# Read the CSV file into a DataFrame
buffalo_df = pd.read_csv(csv_file_path)

# Display the first few rows of the DataFrame
print(buffalo_df.head())
             x_            y_                    t_    id           x1_  \
0  41969.310875 -1.435671e+06  2018-07-25T01:04:23Z  2005  41969.310875   
1  41921.521939 -1.435654e+06  2018-07-25T02:04:39Z  2005  41921.521939   
2  41779.439594 -1.435601e+06  2018-07-25T03:04:17Z  2005  41779.439594   
3  41841.203272 -1.435635e+06  2018-07-25T04:04:39Z  2005  41841.203272   
4  41655.463332 -1.435604e+06  2018-07-25T05:04:27Z  2005  41655.463332   

            y1_           x2_           y2_     x2_cent    y2_cent  ...  \
0 -1.435671e+06  41921.521939 -1.435654e+06  -47.788936  16.857110  ...   
1 -1.435654e+06  41779.439594 -1.435601e+06 -142.082345  53.568427  ...   
2 -1.435601e+06  41841.203272 -1.435635e+06   61.763677 -34.322938  ...   
3 -1.435635e+06  41655.463332 -1.435604e+06 -185.739939  31.003534  ...   
4 -1.435604e+06  41618.651923 -1.435608e+06  -36.811409  -4.438037  ...   

    bearing  bearing_sin  bearing_cos        ta    cos_ta         x_min  \
0  2.802478     0.332652    -0.943050  1.367942  0.201466  40706.810875   
1  2.781049     0.352783    -0.935705 -0.021429  0.999770  40659.021939   
2 -0.507220    -0.485749     0.874098  2.994917 -0.989262  40516.939594   
3  2.976198     0.164641    -0.986354 -2.799767 -0.942144  40578.703272   
4 -3.021610    -0.119695    -0.992811  0.285377  0.959556  40392.963332   

          x_max         y_min         y_max  ndvi_index  
0  43231.810875 -1.436934e+06 -1.434409e+06           8  
1  43184.021939 -1.436917e+06 -1.434392e+06           8  
2  43041.939594 -1.436863e+06 -1.434338e+06           8  
3  43103.703272 -1.436898e+06 -1.434373e+06           8  
4  42917.963332 -1.436867e+06 -1.434342e+06           8  

[5 rows x 32 columns]

Importing spatial data¶

Global layers¶

NDVI¶

In [3]:
file_path = '../mapping/cropped rasters/ndvi_2018-19_late_dry.tif'
file_path_ndvi_aug2018 = '../mapping/cropped rasters/ndvi_aug_2018.tif'
file_path_ndvi_feb2019 = '../mapping/cropped rasters/ndvi_feb_2019.tif'

# read the raster file
with rasterio.open(file_path) as src:
    # Read the raster band as separate variable
    ndvi_global = src.read(1)
    # Get the metadata of the raster
    ndvi_meta = src.meta
    raster_transform = src.transform

with rasterio.open(file_path_ndvi_aug2018) as src:
    # Read the raster band as separate variable
    ndvi_aug2018 = src.read(1)
    # Get the metadata of the raster
    ndvi_meta_aug2018 = src.meta

with rasterio.open(file_path_ndvi_feb2019) as src:
    # Read the raster band as separate variable
    ndvi_feb2019 = src.read(1)
    # Get the metadata of the raster
    ndvi_meta_feb2019 = src.meta
In [4]:
print(ndvi_meta)
print(raster_transform)
print(ndvi_global.shape)

# Replace NaNs in the original array with -1, which represents water
ndvi_global = np.nan_to_num(ndvi_global, nan=-1.0)
ndvi_aug2018 = np.nan_to_num(ndvi_aug2018, nan=-1.0)
ndvi_feb2019 = np.nan_to_num(ndvi_feb2019, nan=-1.0)

# create water mask
water_mask = ndvi_global == -1.0

# from the stack of local layers
ndvi_max = 0.8220
ndvi_min = -0.2772

ndvi_global_tens = torch.from_numpy(ndvi_global)
ndvi_aug2018_tens = torch.from_numpy(ndvi_aug2018)
ndvi_feb2019_tens = torch.from_numpy(ndvi_feb2019)

# Normalizing the data
ndvi_global_norm = (ndvi_global_tens - ndvi_min) / (ndvi_max - ndvi_min)
ndvi_aug2018_norm = (ndvi_aug2018_tens - ndvi_min) / (ndvi_max - ndvi_min)
ndvi_feb2019_norm = (ndvi_feb2019_tens - ndvi_min) / (ndvi_max - ndvi_min)

plt.imshow(ndvi_global_norm.numpy())
plt.colorbar()  
plt.show()

plt.imshow(ndvi_aug2018_norm.numpy())
plt.colorbar()
plt.show()

plt.imshow(ndvi_feb2019_norm.numpy())
plt.colorbar()
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': nan, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
       0.0, -25.0, -1406000.0)}
| 25.00, 0.00, 0.00|
| 0.00,-25.00,-1406000.00|
| 0.00, 0.00, 1.00|
(2280, 2400)
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Canopy cover¶

In [5]:
file_path = '../mapping/cropped rasters/canopy_cover.tif'
# read the raster file
with rasterio.open(file_path) as src:
    # Read the raster band as separate variable
    canopy_global = src.read(1)
    # Get the metadata of the raster
    canopy_meta = src.meta
In [6]:
print(canopy_meta)
print(canopy_global.shape)

# from the stack of local layers
canopy_max = 82.5000
canopy_min = 0.0

canopy_global_tens = torch.from_numpy(canopy_global)

# Normalizing the data
canopy_global_norm = (canopy_global_tens - canopy_min) / (canopy_max - canopy_min)

plt.imshow(canopy_global_norm.numpy())
plt.colorbar()  
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.3999999521443642e+38, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
       0.0, -25.0, -1406000.0)}
(2280, 2400)
No description has been provided for this image

Herbaceous vegetation¶

In [7]:
file_path = '../mapping/cropped rasters/veg_herby.tif'
# read the raster file
with rasterio.open(file_path) as src:
    # Read the raster band as separate variable
    herby_global = src.read(1)
    # Get the metadata of the raster
    herby_meta = src.meta
In [8]:
print(herby_meta)
print(herby_global.shape)

# from the stack of local layers
herby_max = 1.0
herby_min = 0.0

herby_global_tens = torch.from_numpy(herby_global)

# Normalizing the data
herby_global_norm = (herby_global_tens - herby_min) / (herby_max - herby_min)

plt.imshow(herby_global_norm.numpy())
plt.colorbar()  
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': -3.3999999521443642e+38, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
       0.0, -25.0, -1406000.0)}
(2280, 2400)
No description has been provided for this image

Slope¶

In [9]:
file_path = '../mapping/cropped rasters/slope.tif'
# read the raster file
with rasterio.open(file_path) as src:
    # Read the raster band as separate variable
    slope_global = src.read(1)
    # Get the metadata of the raster
    slope_meta = src.meta
    slope_transform = src.transform # same as the raster transform in the NDVI raster read

print(slope_global)
print(slope_transform)
[[       nan        nan        nan ...        nan        nan        nan]
 [       nan 0.3352837  0.39781624 ... 0.         0.                nan]
 [       nan 0.3983888  0.48142004 ... 0.         0.                nan]
 ...
 [       nan 2.215875   1.9798415  ... 1.5078747  1.268342          nan]
 [       nan 1.9740707  1.7354656  ... 1.697194   1.4880029         nan]
 [       nan        nan        nan ...        nan        nan        nan]]
| 25.00, 0.00, 0.00|
| 0.00,-25.00,-1406000.00|
| 0.00, 0.00, 1.00|
In [10]:
print(slope_meta)
print(slope_global.shape)

# Replace NaNs in the original array with -1, which represents water
slope_global = np.nan_to_num(slope_global, nan=0.0)

# from the stack of local layers
slope_max = 12.2981
slope_min = 0.0006

slope_global_tens = torch.from_numpy(slope_global)

# Normalizing the data
slope_global_norm = (slope_global_tens - slope_min) / (slope_max - slope_min)

plt.imshow(slope_global_tens.numpy())
plt.colorbar()  
plt.show()
{'driver': 'GTiff', 'dtype': 'float32', 'nodata': nan, 'width': 2400, 'height': 2280, 'count': 1, 'crs': CRS.from_epsg(3112), 'transform': Affine(25.0, 0.0, 0.0,
       0.0, -25.0, -1406000.0)}
(2280, 2400)
No description has been provided for this image

Testing functions for selecting subsets of the raster layer, using torch objects

In [11]:
# Create a figure and axis with matplotlib
fig, ax = plt.subplots(figsize=(7.5, 7.5))

# Plot the raster
show(slope_global, transform=raster_transform, ax=ax, cmap='viridis')

# Set the title and labels
ax.set_title('Raster with Geographic Coordinates')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')

# Show the plot
plt.show()
No description has been provided for this image
In [12]:
x, y = 5.9e4, -1.447e6
print(x, y)

# Convert geographic coordinates to pixel coordinates
px, py = ~raster_transform * (x, y)
# Round pixel coordinates to integers
px, py = int(round(px)), int(round(py))

# Print the pixel coordinates	
print(px, py)
59000.0 -1447000.0
2360 1640

Running the model on the layers¶

Set the device for the model¶

In [13]:
# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f"Using {device} device")
Using cpu device

Define the model¶

In [14]:
class Conv2d_block_toFC(nn.Module):
    def __init__(self, params):
        super(Conv2d_block_toFC, self).__init__()
        self.batch_size = params.batch_size
        self.input_channels = params.input_channels
        self.output_channels = params.output_channels
        self.kernel_size = params.kernel_size
        self.stride = params.stride
        self.kernel_size_mp = params.kernel_size_mp
        self.stride_mp = params.stride_mp
        self.padding = params.padding
        self.image_dim = params.image_dim
        self.device = params.device

        self.conv2d = nn.Sequential(
        nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=self.kernel_size_mp, stride=self.stride_mp),
        nn.Conv2d(in_channels=self.output_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
        nn.ReLU(),
        nn.MaxPool2d(kernel_size=self.kernel_size_mp, stride=self.stride_mp),
        nn.Flatten())

    def forward(self, x):
        return self.conv2d(x)
    

class Conv2d_block_spatial(nn.Module):
    def __init__(self, params):
        super(Conv2d_block_spatial, self).__init__()
        self.batch_size = params.batch_size
        self.input_channels = params.input_channels
        self.output_channels = params.output_channels
        self.kernel_size = params.kernel_size
        self.stride = params.stride
        # self.kernel_size_mp = params.kernel_size_mp
        # self.stride_mp = params.stride_mp
        self.padding = params.padding
        self.image_dim = params.image_dim
        self.device = params.device

        self.conv2d = nn.Sequential(
        nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
        nn.ReLU(),
        nn.Conv2d(in_channels=self.output_channels, out_channels=self.output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding),
        nn.ReLU(),
        nn.Conv2d(in_channels=self.output_channels, out_channels=1, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
        )

    def forward(self, x):
        conv2d_spatial = self.conv2d(x).squeeze(dim = 1)
        conv2d_spatial = conv2d_spatial - torch.logsumexp(conv2d_spatial, dim = (1, 2), keepdim = True)
        return conv2d_spatial


class FCN_block_all_habitat(nn.Module):
    def __init__(self, params):
        super(FCN_block_all_habitat, self).__init__()
        self.batch_size = params.batch_size
        self.dense_dim_in_all = params.dense_dim_in_all
        self.dense_dim_hidden = params.dense_dim_hidden
        self.dense_dim_out = params.dense_dim_out
        self.image_dim = params.image_dim
        self.device = params.device
        self.dropout = params.dropout

        self.ffn = nn.Sequential(
            nn.Linear(self.dense_dim_in_all, self.dense_dim_hidden),
            nn.Dropout(self.dropout),
            nn.ReLU(),
            nn.Linear(self.dense_dim_hidden, self.dense_dim_hidden),
            nn.Dropout(self.dropout),
            nn.ReLU(),
            nn.Linear(self.dense_dim_hidden, self.image_dim * self.image_dim)
        )
    
    def forward(self, x):
        return self.ffn(x)


class FCN_block_all_movement(nn.Module):
    def __init__(self, params):
        super(FCN_block_all_movement, self).__init__()
        self.batch_size = params.batch_size
        self.dense_dim_in_all = params.dense_dim_in_all
        self.dense_dim_hidden = params.dense_dim_hidden
        self.dense_dim_out = params.dense_dim_out
        self.image_dim = params.image_dim
        self.device = params.device
        self.num_movement_params = params.num_movement_params
        self.dropout = params.dropout

        self.ffn = nn.Sequential(
            nn.Linear(self.dense_dim_in_all, self.dense_dim_hidden),
            nn.Dropout(self.dropout),
            nn.ReLU(),
            nn.Linear(self.dense_dim_hidden, self.dense_dim_hidden),
            nn.Dropout(self.dropout),
            nn.ReLU(),
            nn.Linear(self.dense_dim_hidden, self.num_movement_params)
        )
    
    def forward(self, x):
        return self.ffn(x)



##################################################
# Mixture of two Gamma and von Mises distributions with the von Mises mu parameters allowed to vary
##################################################

class Params_to_Grid_Block(nn.Module):
    def __init__(self, params):
        super(Params_to_Grid_Block, self).__init__()
        self.batch_size = params.batch_size
        self.image_dim = params.image_dim
        self.pixel_size = params.pixel_size
        self.center = self.image_dim // 2
        y, x = np.indices((self.image_dim, self.image_dim))
        self.distance_layer = torch.from_numpy(np.sqrt((self.pixel_size*(x - self.center))**2 + (self.pixel_size*(y - self.center))**2))
        # change the centre cell to the average distance from the centre to the edge of the pixel
        self.distance_layer[self.center, self.center] = 0.56*self.pixel_size # average distance from the centre to the perimeter of the pixel (accounting for longer distances at the corners)
        # self.bearing_layer = torch.from_numpy(np.arctan2(y - self.center, x - self.center))
        self.bearing_layer = torch.from_numpy(np.arctan2(self.center - y, x - self.center))
        self.device = params.device


    # Gamma desnities for the mixture distribution
    def gamma_density(self, x, shape, scale):
        # Ensure all tensors are on the same device as x
        shape = shape.to(x.device)
        scale = scale.to(x.device)
        return -1*torch.lgamma(shape) -shape*torch.log(scale) + (shape - 1)*torch.log(x) - x/scale

    # von Mises densities for the mixture distribution
    def vonmises_density(self, x, kappa, vm_mu):
        # Ensure all tensors are on the same device as x
        kappa = kappa.to(x.device)
        vm_mu = vm_mu.to(x.device)
        return kappa*torch.cos(x - vm_mu) - 1*(np.log(2*torch.pi) + torch.log(torch.special.i0(kappa)))
    

    def forward(self, x, bearing):

        # parameters of the first mixture distribution
        gamma_shape1 = torch.exp(x[:, 0]).unsqueeze(0).unsqueeze(0)
        gamma_shape1 = gamma_shape1.repeat(self.image_dim, self.image_dim, 1)
        gamma_shape1 = gamma_shape1.permute(2, 0, 1)

        gamma_scale1 = torch.exp(x[:, 1]).unsqueeze(0).unsqueeze(0)
        gamma_scale1 = gamma_scale1.repeat(self.image_dim, self.image_dim, 1)
        gamma_scale1 = gamma_scale1.permute(2, 0, 1)

        gamma_weight1 = torch.exp(x[:, 2]).unsqueeze(0).unsqueeze(0)
        gamma_weight1 = gamma_weight1.repeat(self.image_dim, self.image_dim, 1)
        gamma_weight1 = gamma_weight1.permute(2, 0, 1)

        # parameters of the second mixture distribution
        gamma_shape2 = torch.exp(x[:, 3]).unsqueeze(0).unsqueeze(0)
        gamma_shape2 = gamma_shape2.repeat(self.image_dim, self.image_dim, 1)
        gamma_shape2 = gamma_shape2.permute(2, 0, 1)

        gamma_scale2 = torch.exp(x[:, 4]).unsqueeze(0).unsqueeze(0)
        gamma_scale2 = gamma_scale2 * 500 ### to transform the scale parameter to be near 1
        gamma_scale2 = gamma_scale2.repeat(self.image_dim, self.image_dim, 1)
        gamma_scale2 = gamma_scale2.permute(2, 0, 1)

        gamma_weight2 = torch.exp(x[:, 5]).unsqueeze(0).unsqueeze(0)
        gamma_weight2 = gamma_weight2.repeat(self.image_dim, self.image_dim, 1)
        gamma_weight2 = gamma_weight2.permute(2, 0, 1)

        # Apply softmax to the weights
        gamma_weights = torch.stack([gamma_weight1, gamma_weight2], dim=0)
        gamma_weights = torch.nn.functional.softmax(gamma_weights, dim=0)
        gamma_weight1 = gamma_weights[0]
        gamma_weight2 = gamma_weights[1]

        # calculation of Gamma densities
        gamma_density_layer1 = self.gamma_density(self.distance_layer, gamma_shape1, gamma_scale1).to(device)
        gamma_density_layer2 = self.gamma_density(self.distance_layer, gamma_shape2, gamma_scale2).to(device)

        # combining both densities to create a mixture distribution using logsumexp
        logsumexp_gamma_corr = torch.max(gamma_density_layer1, gamma_density_layer2)
        gamma_density_layer = logsumexp_gamma_corr + torch.log(gamma_weight1 * torch.exp(gamma_density_layer1 - logsumexp_gamma_corr) + gamma_weight2 * torch.exp(gamma_density_layer2 - logsumexp_gamma_corr))


        ## Von Mises Distributions

        # calculate the new bearing from the turning angle
        # takes in the bearing from the previous step and adds the turning angle
        bearing_new1 = x[:, 6] + bearing[:, 0]

        # the new bearing becomes the mean of the von Mises distribution
        # the estimated parameter [x:, 6] is the turning angle of the next step
        # which is always in reference to the input bearing
        vonmises_mu1 = bearing_new1.unsqueeze(0).unsqueeze(0)
        vonmises_mu1 = vonmises_mu1.repeat(self.image_dim, self.image_dim, 1)
        vonmises_mu1 = vonmises_mu1.permute(2, 0, 1)

        # parameters of the first von Mises distribution
        vonmises_kappa1 = torch.exp(x[:, 7]).unsqueeze(0).unsqueeze(0)
        vonmises_kappa1 = vonmises_kappa1.repeat(self.image_dim, self.image_dim, 1)
        vonmises_kappa1 = vonmises_kappa1.permute(2, 0, 1)

        vonmises_weight1 = torch.exp(x[:, 8]).unsqueeze(0).unsqueeze(0)
        vonmises_weight1 = vonmises_weight1.repeat(self.image_dim, self.image_dim, 1)
        vonmises_weight1 = vonmises_weight1.permute(2, 0, 1)

        # vm_mu and weight for the second von Mises distribution
        bearing_new2 = x[:, 9] + bearing[:, 0]

        vonmises_mu2 = bearing_new2.unsqueeze(0).unsqueeze(0)
        vonmises_mu2 = vonmises_mu2.repeat(self.image_dim, self.image_dim, 1)
        vonmises_mu2 = vonmises_mu2.permute(2, 0, 1)

        # parameters of the second von Mises distribution
        vonmises_kappa2 = torch.exp(x[:, 10]).unsqueeze(0).unsqueeze(0)
        vonmises_kappa2 = vonmises_kappa2.repeat(self.image_dim, self.image_dim, 1)
        vonmises_kappa2 = vonmises_kappa2.permute(2, 0, 1)

        vonmises_weight2 = torch.exp(x[:, 11]).unsqueeze(0).unsqueeze(0)
        vonmises_weight2 = vonmises_weight2.repeat(self.image_dim, self.image_dim, 1)
        vonmises_weight2 = vonmises_weight2.permute(2, 0, 1)

        # Apply softmax to the weights
        vonmises_weights = torch.stack([vonmises_weight1, vonmises_weight2], dim=0)
        vonmises_weights = torch.nn.functional.softmax(vonmises_weights, dim=0)
        vonmises_weight1 = vonmises_weights[0]
        vonmises_weight2 = vonmises_weights[1]

        # calculation of von Mises densities
        vonmises_density_layer1 = self.vonmises_density(self.bearing_layer, vonmises_kappa1, vonmises_mu1).to(device)
        vonmises_density_layer2 = self.vonmises_density(self.bearing_layer, vonmises_kappa2, vonmises_mu2).to(device)

        # combining both densities to create a mixture distribution using the logsumexp trick
        logsumexp_vm_corr = torch.max(vonmises_density_layer1, vonmises_density_layer2)
        vonmises_density_layer = logsumexp_vm_corr + torch.log(vonmises_weight1 * torch.exp(vonmises_density_layer1 - logsumexp_vm_corr) + vonmises_weight2 * torch.exp(vonmises_density_layer2 - logsumexp_vm_corr))
        
        # combining the two distributions
        movement_grid = gamma_density_layer + vonmises_density_layer # Gamma and von Mises densities are on the log-scale
        # normalise the movement predictions
        movement_grid = movement_grid - torch.logsumexp(movement_grid, dim = (1, 2), keepdim = True)

        return movement_grid
    

class Scalar_to_Grid_Block(nn.Module):
    def __init__(self, params):
        super(Scalar_to_Grid_Block, self).__init__()
        self.batch_size = params.batch_size
        self.image_dim = params.image_dim
        self.device = params.device
    
    def forward(self, x):
        num_scalars = x.shape[1]
        scalar_map = x.view(x.shape[0], num_scalars, 1, 1).expand(x.shape[0], num_scalars, self.image_dim, self.image_dim)
        return scalar_map


class ConvJointModel(nn.Module):
    def __init__(self, params):
        super(ConvJointModel, self).__init__()

        self.scalar_grid_output = Scalar_to_Grid_Block(params)
        self.conv_habitat = Conv2d_block_spatial(params)

        self.conv_movement = Conv2d_block_toFC(params)
        self.fcn_movement_all = FCN_block_all_movement(params)
        self.movement_grid_output = Params_to_Grid_Block(params)
        self.device = params.device

    def forward(self, x):
        spatial_data_x = x[0]
        scalars_to_grid = x[1]
        bearing_x = x[2]

        # SCALAR GRIDS
        scalar_grids = self.scalar_grid_output(scalars_to_grid)
        all_spatial = torch.cat([spatial_data_x, scalar_grids], dim = 1)
        # print(f"Shape after scalar grid: {all_spatial.shape}")  # Debugging print


        # HABITAT SELECTION
        output_habitat = self.conv_habitat(all_spatial)
        # print(f"Shape after CNN habitat: {output_habitat.shape}")  # Debugging print


        # MOVEMENT
        conv_movement = self.conv_movement(all_spatial)
        # print(f"Shape after CNN to FC movement: {conv_movement.shape}")  # Debugging print

        output_movement = self.fcn_movement_all(conv_movement)
        # print(f"Shape after fcn_movement_all: {output_movement.shape}")  # Debugging print
        output_movement = self.movement_grid_output(output_movement, bearing_x)
        # print(f"Shape after CNN movement: {output_movement.shape}")  # Debugging print

        # combine the habitat and movement predictions
        output = torch.stack((output_habitat, output_movement), dim = -1)
        return output
    

class ModelParams():
    def __init__(self, dict_params):
        self.batch_size = dict_params["batch_size"]
        self.image_dim = dict_params["image_dim"]
        self.pixel_size = dict_params["pixel_size"]
        self.batch_size = dict_params["batch_size"]
        self.dim_in_nonspatial_to_grid = dict_params["dim_in_nonspatial_to_grid"]
        self.dense_dim_in_nonspatial = dict_params["dense_dim_in_nonspatial"]
        self.dense_dim_hidden = dict_params["dense_dim_hidden"]
        self.dense_dim_out = dict_params["dense_dim_out"]
        self.batch_size = dict_params["batch_size"]
        self.dense_dim_in_all = dict_params["dense_dim_in_all"]
        self.dense_dim_hidden = dict_params["dense_dim_hidden"]
        self.dense_dim_out = dict_params["dense_dim_out"]
        self.batch_size = dict_params["batch_size"]
        self.input_channels = dict_params["input_channels"]
        self.output_channels = dict_params["output_channels"]
        self.kernel_size = dict_params["kernel_size"]
        self.stride = dict_params["stride"]
        self.kernel_size_mp = dict_params["kernel_size_mp"]
        self.stride_mp = dict_params["stride_mp"]
        self.padding = dict_params["padding"]
        self.image_dim = dict_params["image_dim"]
        self.num_movement_params = dict_params["num_movement_params"]
        self.dropout = dict_params["dropout"]
        self.device = dict_params["device"]

Instantiate the model¶

In [15]:
params_dict = {"batch_size": 32,
               "image_dim": 101, #number of pixels along the edge of each local patch/image
               "pixel_size": 25, #number of metres along the edge of a pixel
               "dim_in_nonspatial_to_grid": 4, #the number of scalar predictors that are converted to a grid and appended to the spatial features
               "dense_dim_in_nonspatial": 4, #change this to however many other scalar predictors you have (bearing, velocity etc)
               "dense_dim_hidden": 128, #number of nodes in the hidden layers
               "dense_dim_out": 128, #number of nodes in the output of the fully connected block (FCN)
               "dense_dim_in_all": 2500,# + 128, #number of inputs entering the fully connected block once the nonspatial features have been concatenated to the spatial features
               "input_channels": 4 + 4, #number of spatial layers in each image + number of scalar layers that are converted to a grid
               "output_channels": 4, #number of filters to learn
               "kernel_size": 3, #the size of the 2D moving windows / kernels that are being learned
               "stride": 1, #the stride used when applying the kernel.  This reduces the dimension of the output if set to greater than 1
               "kernel_size_mp": 2, #the size of the kernel that is used in max pooling operations
               "stride_mp": 2, #the stride that is used in max pooling operations
               "padding": 1, #the amount of padding to apply to images prior to applying the 2D convolution
               "num_movement_params": 12, #number of parameters used to parameterise the movement kernel
               "dropout": 0.1,
               "device": device
               }

params = ModelParams(params_dict)
model = ConvJointModel(params).to(device)
# print(model)
In [28]:
# check which id is specified
print(buffalo_id)

date = '2024-09-29'

# # load the model weights
# print(model.state_dict())
model.load_state_dict(torch.load(f'model_checkpoints/checkpoint_CNN_global_buffalo{buffalo_id}_TAmix_bearing-rev_adj_hab-covs_grid-only_{date}.pt', map_location=torch.device('cpu')))
# print(model.state_dict())
# model.eval()
2005
C:\Users\for329\AppData\Local\Temp\ipykernel_36736\3609159849.py:8: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
  model.load_state_dict(torch.load(f'model_checkpoints/checkpoint_CNN_global_buffalo{buffalo_id}_TAmix_bearing-rev_adj_hab-covs_grid-only_{date}.pt', map_location=torch.device('cpu')))
Out[28]:
<All keys matched successfully>

Run CNN component on landscape¶

Select a smaller extent of the landscape¶

In [29]:
# from the buffalo data
buffer = 1250
min_x = min(buffalo_df['x_']) - buffer
max_x = max(buffalo_df['x_']) + buffer
min_y = min(buffalo_df['y_']) - buffer
max_y = max(buffalo_df['y_']) + buffer

# custom extent
# min_x = 28148.969145
# max_x = 47719.496935
# min_y = -1442210.335861
# max_y = -1433133.681746

# Convert geographic coordinates to pixel coordinates
min_px, min_py = ~raster_transform * (min_x, min_y)
print(min_px, min_py)
max_px, max_py = ~raster_transform * (max_x, max_y)
print(max_px, max_py)
# Round pixel coordinates to integers
min_px, max_px, min_py, max_py = int(round(min_px)), int(round(max_px)), int(round(min_py)), int(round(max_py))

# Print the pixel coordinates	
print(min_px, max_px, min_py, max_py)
1091.3389860694217 1546.6350942641802
2166.104503472696 888.6517715825685
1091 2166 1547 889

Subset all layers¶

In [30]:
# Initialize the subset array with zeros (or any other padding value)
# subset = np.zeros((max_px - min_px, min_py - max_py), dtype=slope_global.dtype)
subset = np.zeros((min_py - max_py, max_px - min_px), dtype=slope_global.dtype)

# subset = slope_global[min_px:max_px, min_py:max_py]
ndvi_subset = ndvi_global_norm[max_py:min_py, min_px:max_px]
ndvi_aug2018_subset = ndvi_aug2018_norm[max_py:min_py, min_px:max_px]
ndvi_feb2019_subset = ndvi_feb2019_norm[max_py:min_py, min_px:max_px]
canopy_subset = canopy_global_norm[max_py:min_py, min_px:max_px]
herby_subset = herby_global_norm[max_py:min_py, min_px:max_px]
slope_subset = slope_global_norm[max_py:min_py, min_px:max_px]

ndvi_subset_natural = ndvi_subset * (ndvi_max - ndvi_min) + ndvi_min
ndvi_aug2018_subset_natural = ndvi_aug2018_subset * (ndvi_max - ndvi_min) + ndvi_min
ndvi_feb2019_subset_natural = ndvi_feb2019_subset * (ndvi_max - ndvi_min) + ndvi_min
canopy_subset_natural = canopy_subset * (canopy_max - canopy_min) + canopy_min
herby_subset_natural = herby_subset * (herby_max - herby_min) + herby_min
slope_subset_natural = slope_subset * (slope_max - slope_min) + slope_min

# Find the global min and max values across all subsets
vmin = min(ndvi_subset_natural.min(), ndvi_aug2018_subset_natural.min(), ndvi_feb2019_subset_natural.min())
print(vmin)
vmax = max(ndvi_subset_natural.max(), ndvi_aug2018_subset_natural.max(), ndvi_feb2019_subset_natural.max())
print(vmax)



# Plot the subsets with the same color scale
plt.imshow(ndvi_subset_natural, cmap='viridis', vmin=vmin, vmax=vmax)
plt.colorbar(shrink=0.7)
plt.title(f'NDVI Late-dry season average 2018-19')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_ndvi_latedry_2018-19.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

plt.imshow(ndvi_aug2018_subset_natural, cmap='viridis', vmin=vmin, vmax=vmax)
plt.colorbar(shrink=0.7)
plt.title(f'NDVI August 2018')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_ndvi_aug2018.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

plt.imshow(ndvi_feb2019_subset_natural, cmap='viridis', vmin=vmin, vmax=vmax)
plt.colorbar(shrink=0.7)
plt.title(f'NDVI February 2019')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_ndvi_feb2019.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

# plot the subset
plt.imshow(canopy_subset_natural, cmap='viridis')
plt.colorbar(shrink=0.7)
plt.title(f'Canopy cover')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_canopy_cover.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

# plot the subset
plt.imshow(herby_subset_natural, cmap='viridis')
plt.colorbar(shrink=0.7)
plt.title(f'Herbaceous vegetation')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_herby_veg.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

# plot the subset
plt.imshow(slope_subset_natural, cmap='viridis')
plt.colorbar(shrink=0.7)
plt.title(f'Slope')
plt.savefig(f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_slope.png", dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory
tensor(-0.2894)
tensor(0.7881)
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Select layer extent¶

In [31]:
# full extent
# Assuming ndvi_global_norm, canopy_global_norm, herby_global_norm, slope_global_norm, and params are already defined
# ndvi_landscape = ndvi_global_norm.to(params.device)
# print(ndvi_landscape.shape)
# canopy_landscape = canopy_global_norm.to(params.device)
# herby_landscape = herby_global_norm.to(params.device)
# slope_landscape = slope_global_norm.to(params.device)

# subset extent
# Assuming ndvi_subset, canopy_subset, herby_subset, slope_subset, and params are already defined

# ndvi_landscape = ndvi_subset.to(params.device)
ndvi_landscape = ndvi_aug2018_subset.to(params.device)
# ndvi_landscape = ndvi_feb2019_subset.to(params.device)
print(ndvi_landscape.shape)

canopy_landscape = canopy_subset.to(params.device)
herby_landscape = herby_subset.to(params.device)
slope_landscape = slope_subset.to(params.device)
torch.Size([658, 1075])
In [32]:
# hour of the day (hour) 
hour_t2 = 6
# convert to sine and cosine
hour_t2_sin = np.sin(2*np.pi*hour_t2/24)
hour_t2_cos = np.cos(2*np.pi*hour_t2/24)

# day of the year (yday)
yday_t2 = 225
# convert to sine and cosine
yday_t2_sin = np.sin(2*np.pi*yday_t2/365)
yday_t2_cos = np.cos(2*np.pi*yday_t2/365)

# Convert lists to PyTorch tensors
hour_t2_sin_tensor = torch.tensor(hour_t2_sin).float().unsqueeze(0)
hour_t2_cos_tensor = torch.tensor(hour_t2_cos).float().unsqueeze(0)
yday_t2_sin_tensor = torch.tensor(yday_t2_sin).float().unsqueeze(0)
yday_t2_cos_tensor = torch.tensor(yday_t2_cos).float().unsqueeze(0)

def scalar_to_grid(x, dim_x, dim_y):
    scalar_map = x.view(1, 1, 1, 1).expand(1, 1, dim_x, dim_y)
    return scalar_map

# Stack tensors column-wise to create a tensor of shape 
scalar_covariates = torch.stack([hour_t2_sin_tensor, hour_t2_cos_tensor, yday_t2_sin_tensor, yday_t2_cos_tensor], dim=0)
print(scalar_covariates.shape)

scalar_grids = torch.cat([scalar_to_grid(tensor, ndvi_landscape.shape[0], ndvi_landscape.shape[1]) for tensor in scalar_covariates], dim=1)
print(scalar_grids.shape)
torch.Size([4, 1])
torch.Size([1, 4, 658, 1075])
In [33]:
# Stack the channels along a new axis; here, 1 is commonly used for channel axis in PyTorch
landscape_stack = torch.stack([ndvi_landscape, canopy_landscape, herby_landscape, slope_landscape], dim=0)
landscape_stack = landscape_stack.unsqueeze(0)
print(landscape_stack.shape)

full_stack = torch.cat([landscape_stack, scalar_grids], dim=1)
print(full_stack.shape)
torch.Size([1, 4, 658, 1075])
torch.Size([1, 8, 658, 1075])
In [34]:
landscape_predictions = model.conv_habitat(full_stack)
print(landscape_predictions.shape)
torch.Size([1, 658, 1075])
In [35]:
# Assuming the output has the shape [batch_size, channels, height, width]
# For simplicity, let's visualize the first image in the batch and its first channel
output_image = landscape_predictions[0].detach().cpu().numpy()

# Create the mask for x and y coordinates
x_mask = np.ones_like(output_image)
y_mask = np.ones_like(output_image)
water_mask = np.ones_like(output_image)

y_dim = output_image.shape[0]
x_dim = output_image.shape[1]

buffer = 3

# Create the mask for x and y coordinates
x_mask[:, :buffer] = -np.inf
x_mask[:, x_dim-buffer:] = -np.inf
y_mask[:buffer, :] = -np.inf
y_mask[y_dim-buffer:, :] = -np.inf


# mask out cells on the edges that affect the colour scale
output_image = output_image * x_mask * y_mask
# output_image[ndvi_global == -1.0] = -np.inf

# Plot the image using matplotlib
filename_landscape_preds = f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_hab_log_prob_norm_yday{yday_t2}_hour{hour_t2}.png"
# filename_landscape_preds = f"outputs/dl_prob_maps_landscape/id{buffalo_id}_hab_log_prob_norm_{hour_t2}_{today_date}.png"
plt.imshow(output_image, cmap='viridis')  # Use 'gray' for grayscale or change as needed
plt.colorbar()  # Optional: show the color scale bar
plt.title(f'Day of the year: {yday_t2}, Hour: {hour_t2}')
plt.savefig(filename_landscape_preds, dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory

# Plot the image using matplotlib
filename_landscape_preds = f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_hab_log_prob_norm_yday{yday_t2}_hour{hour_t2}.png"
# filename_landscape_preds = f"outputs/dl_prob_maps_landscape/id{buffalo_id}_hab_log_prob_norm_{hour_t2}_{today_date}.png"
plt.imshow(np.exp(output_image), cmap='viridis')  # Use 'gray' for grayscale or change as needed
plt.colorbar()  # Optional: show the color scale bar
plt.title(f'Day of the year: {yday_t2}, Hour: {hour_t2}')
plt.savefig(filename_landscape_preds, dpi=600, bbox_inches='tight')
plt.show()
plt.close()  # Close the figure to free memory
No description has been provided for this image
No description has been provided for this image

Extracting values of the predictions to assess influence of landscape features¶

In [36]:
import torch
import pandas as pd
import matplotlib.pyplot as plt

# Assuming ndvi_landscape, canopy_landscape, herby_landscape, slope_landscape, and landscape_predictions are already defined tensors
# Flatten the tensors and convert to numpy arrays
ndvi_array = ndvi_landscape.detach().numpy()
ndvi_array = ndvi_array * x_mask * y_mask
ndvi_array = ndvi_array.flatten()

canopy_array = canopy_landscape.detach().numpy()
canopy_array = canopy_array * x_mask * y_mask
canopy_array = canopy_array.flatten()

herby_array = herby_landscape.detach().numpy()
herby_array = herby_array * x_mask * y_mask
herby_array = herby_array.flatten()

slope_array = slope_landscape.detach().numpy()
slope_array = slope_array * x_mask * y_mask
slope_array = slope_array.flatten()

landscape_predictions_array = torch.exp(landscape_predictions).detach().numpy()
landscape_predictions_array = landscape_predictions_array * x_mask * y_mask
landscape_predictions_array = landscape_predictions_array.flatten()


# Create a single DataFrame with updated column names
df = pd.DataFrame({
    'NDVI': ndvi_array,
    'Canopy_cover': canopy_array,
    'Herbaceous_vegetation': herby_array,
    'Slope': slope_array,
    'Predictions': landscape_predictions_array
})

# Export the DataFrame to a CSV file
df.to_csv(f"outputs/id{buffalo_id}_habitat_suitability_landscape_subset.csv", index=False)

# Plot the data with plt.scatter
plt.figure(figsize=(10, 6))
plt.scatter(df['NDVI'], df['Predictions'], alpha=0.5)
plt.title('NDVI vs Predictions')
plt.xlabel('NDVI')
plt.ylabel('Predictions')
plt.grid(True)
plt.show()

plt.figure(figsize=(10, 6))
plt.scatter(df['Canopy_cover'], df['Predictions'], alpha=0.5)
plt.title('Canopy cover vs Predictions')
plt.xlabel('Canopy cover')
plt.ylabel('Predictions')
plt.grid(True)
plt.show()

plt.figure(figsize=(10, 6))
plt.scatter(df['Herbaceous_vegetation'], df['Predictions'], alpha=0.5)
plt.title('Herbaceous vegetation vs Predictions')
plt.xlabel('Herbaceous vegetation')
plt.ylabel('Predictions')
plt.grid(True)
plt.show()

plt.figure(figsize=(10, 6))
plt.scatter(df['Slope'], df['Predictions'], alpha=0.5)
plt.title('Slope vs Predictions')
plt.xlabel('Slope')
plt.ylabel('Predictions')
plt.grid(True)
plt.show()
C:\Users\for329\AppData\Local\Temp\ipykernel_36736\68050730.py:12: RuntimeWarning: invalid value encountered in multiply
  canopy_array = canopy_array * x_mask * y_mask
C:\Users\for329\AppData\Local\Temp\ipykernel_36736\68050730.py:16: RuntimeWarning: invalid value encountered in multiply
  herby_array = herby_array * x_mask * y_mask
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Together to loop over hours¶

In [37]:
# Initialize global min and max values
global_vmin = float('inf')
global_vmax = float('-inf')
In [38]:
# Create a single DataFrame with updated column names
df_hourly = pd.DataFrame({
    'NDVI': ndvi_array,
    'Canopy_cover': canopy_array,
    'Herbaceous_vegetation': herby_array,
    'Slope': slope_array
})

# Define the range of hours you want to loop over
hours = range(1,25) # to start at 1
# hours = np.arange(0,24, 0.1)

# Initialize lists to store results
scalar_grids_list = []
landscape_predictions_list = []

for hour_t2 in hours:
    # convert to sine and cosine
    hour_t2_sin = np.sin(2*np.pi*hour_t2/24)
    hour_t2_cos = np.cos(2*np.pi*hour_t2/24)

    # day of the year (yday)
    # yday_t2 = 45
    # convert to sine and cosine
    yday_t2_sin = np.sin(2*np.pi*yday_t2/365)
    yday_t2_cos = np.cos(2*np.pi*yday_t2/365)

    # Convert lists to PyTorch tensors
    hour_t2_sin_tensor = torch.tensor(hour_t2_sin).float().unsqueeze(0)
    hour_t2_cos_tensor = torch.tensor(hour_t2_cos).float().unsqueeze(0)
    yday_t2_sin_tensor = torch.tensor(yday_t2_sin).float().unsqueeze(0)
    yday_t2_cos_tensor = torch.tensor(yday_t2_cos).float().unsqueeze(0)

    # Stack tensors column-wise to create a tensor of shape 
    scalar_covariates = torch.stack([hour_t2_sin_tensor, hour_t2_cos_tensor, yday_t2_sin_tensor, yday_t2_cos_tensor], dim=0)
    scalar_grids = torch.cat([scalar_to_grid(tensor, ndvi_landscape.shape[0], ndvi_landscape.shape[1]) for tensor in scalar_covariates], dim=1)

    full_stack = torch.cat([landscape_stack, scalar_grids], dim=1)
    # print(full_stack.shape)

    landscape_predictions = model.conv_habitat(full_stack)
    # print(landscape_predictions.shape)

    # Assuming the output has the shape [batch_size, channels, height, width]
    # For simplicity, let's visualize the first image in the batch and its first channel
    output_image = landscape_predictions[0].detach().cpu().numpy()

    # mask out cells on the edges that affect the colour scale
    output_image = output_image * x_mask * y_mask

    # Check if output_image is valid before updating global min and max
    if output_image.size > 0:
        # Ignore masked values in the calculation
        valid_values = output_image[np.isfinite(output_image)]
        if valid_values.size > 0:
            current_min = valid_values.min()
            current_max = valid_values.max()
            global_vmin = min(global_vmin, current_min)
            global_vmax = max(global_vmax, current_max)

    predictions_array = np.exp(output_image).flatten()
    df_hourly[f'{hour_t2}'] = predictions_array

    # Save the figure
    filename_landscape_preds = f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_landscape_habitat_selection_yday{yday_t2}_hour{hour_t2}.png"
    # filename_landscape_preds = f"outputs/dl_prob_maps_landscape/id{buffalo_id}_landscape_habitat_selection_{hour_t2}_{today_date}.png"
    plt.figure()  # Create a new figure
    # plt.imshow(output_image)
    plt.imshow(output_image, vmin=global_vmin, vmax=global_vmax)
    # plt.imshow(output_image, transform=raster_transform, vmin=global_vmin, vmax=global_vmax)
    plt.colorbar(shrink=0.7)
    plt.title(f'Day of the year: {yday_t2}, Hour: {hour_t2}')
    plt.savefig(filename_landscape_preds, dpi=1000, bbox_inches='tight')
    plt.show()
    plt.close()  # Close the figure to free memory

    # # Save the figure
    # filename_landscape_preds = f"outputs/dl_prob_maps_landscape/{buffalo_id}/id{buffalo_id}_exp_landscape_habitat_selection_yday{yday_t2}_hour{hour_t2}.png"
    # # filename_landscape_preds = f"outputs/dl_prob_maps_landscape/id{buffalo_id}_exp_landscape_habitat_selection_{hour_t2}_{today_date}.png"
    # plt.figure()  # Create a new figure
    # # plt.imshow(np.exp(output_image))
    # plt.imshow(np.exp(output_image), vmin=np.exp(global_vmin), vmax=np.exp(global_vmax))
    # # plt.imshow(np.exp(output_image), transform=raster_transform, vmin=np.exp(global_vmin), vmax=np.exp(global_vmax))
    # plt.colorbar(shrink=0.7)
    # plt.title(f'Day of the year: {yday_t2}, Hour: {hour_t2}')
    # plt.savefig(filename_landscape_preds, dpi=600, bbox_inches='tight')
    # plt.show()
    # plt.close()  # Close the figure to free memory

print(global_vmin, global_vmax)


# Export the DataFrame to a CSV file
df_hourly.to_csv(f"outputs/id{buffalo_id}_hourly_habitat_suitability_landscape_subset_yday{yday_t2}.csv", index=False)

# # Randomly sample rows from the df_hourly DataFrame
# sampled_df_hourly = df_hourly.sample(frac=0.1, random_state=42)  # Adjust the fraction as needed
# # Export the sampled DataFrame to a CSV file
# sampled_df_hourly.to_csv(f"outputs/id{buffalo_id}_hourly_fine_habitat_suitability_landscape_subset.csv", index=False)
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
-19.016916 -10.933311